import os
import shutil
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from torchvision import datasets, models, transforms
from torchvision.utils import save_image
from torch.utils.tensorboard import SummaryWriter
TBOARD_LOGS = os.path.join(os.getcwd(), "tboard_logs", "test")
if not os.path.exists(TBOARD_LOGS):
os.makedirs(TBOARD_LOGS)
print(TBOARD_LOGS)
C:\Users\Pahul\Downloads\Coding\Cuda lab\tboard_logs\test
shutil.rmtree(TBOARD_LOGS)
writer = SummaryWriter(TBOARD_LOGS)
print(writer)
<torch.utils.tensorboard.writer.SummaryWriter object at 0x000001B4BF883CA0>
randoms = []
for i in range(int(1e4)):
log = np.log10(i+1)
random = np.random.randn()
randoms.append(random)
uniform = np.random.rand() * 2 - 1
writer.add_scalar(f'Log Number', log, global_step=i)
writer.add_scalars(f'Random Stuff', {
'Gaussian': random,
'Uniform': uniform
}, i)
if(i % 1000 == 0):
# adding random figures
imgs = torch.randn(16,1,16,16)
grid = torchvision.utils.make_grid(imgs)
writer.add_image('images', grid, global_step=i)
# adding distribution
writer.add_histogram("Sampled Gaussian", np.array(randoms), global_step=i)
GAN_LOGS = os.path.join(os.getcwd(), "tboard_logs", "gan")
if not os.path.exists(GAN_LOGS):
os.makedirs(GAN_LOGS)
shutil.rmtree(GAN_LOGS)
writer = SummaryWriter(GAN_LOGS)
if not os.path.exists("imgs"):
os.makedirs("imgs")
if not os.path.exists("imgs/training"):
os.makedirs("imgs/training")
# Downloading and Loading Dataset
mnist_tf = transforms.Compose([
transforms.ToTensor(),
#transforms.Pad(2)
])
train_dataset = datasets.CIFAR10(root='./data', train=True, transform=mnist_tf,download=True)
#test_dataset = datasets.CIFAR10(root='./data', train=False, transform=mnist_tf,download=True)
Files already downloaded and verified
b_size = 128
image_size = 64
# Number of channels in the training images. For color images this is 3
nc = 3
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 32
# Size of feature maps in discriminator
ndf = 32
# Number of training epochs
num_epochs = 25
# Learning rate for optimizers
lr = 0.0003
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# Number of GPUs available. Use 0 for CPU mode.
ngpu = 1
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=b_size, shuffle=True)
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class Generator(nn.Module):
"""
A fully connected generator using LeakyReLU activations.
Takes as a latent vector and outputs a fake sample.
"""
def __init__(self):
""" Model initializer """
super().__init__()
self.gen = nn.Sequential(
nn.ConvTranspose2d( nz, ngf * 16, 2, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 16),
nn.LeakyReLU(0.2),
# state size. (ngf*16) x 2 x 2
nn.ConvTranspose2d(ngf * 16, ngf * 8, 3, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.2),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.2),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d( ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(0.2),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 1, 1, 0, bias=False),
# state size. (nc) x 32 x 32
nn.Tanh()
)
return
def forward(self, x):
""" Forward pass through generator """
y = self.gen(x)
return y
class Discriminator(nn.Module):
""" A fully connected discriminator using LeakyReLU activations.
Takes as input either a real or fake sample and predicts its autenticity.
"""
def __init__(self):
""" Module initializer """
super().__init__()
self.dis = nn.Sequential(
nn.Conv2d(nc, ndf, 1, 1, 0, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
return
def forward(self, x):
""" Forward pass """
y = self.dis(x)
return y
generator = Generator()
print(generator)
Generator(
(gen): Sequential(
(0): ConvTranspose2d(100, 512, kernel_size=(2, 2), stride=(1, 1), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.2)
(3): ConvTranspose2d(512, 256, kernel_size=(3, 3), stride=(1, 1), bias=False)
(4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): LeakyReLU(negative_slope=0.2)
(6): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(8): LeakyReLU(negative_slope=0.2)
(9): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(10): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(11): LeakyReLU(negative_slope=0.2)
(12): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(13): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(14): LeakyReLU(negative_slope=0.2)
(15): ConvTranspose2d(32, 3, kernel_size=(1, 1), stride=(1, 1), bias=False)
(16): Tanh()
)
)
discriminator = Discriminator()
print(discriminator)
Discriminator(
(dis): Sequential(
(0): Conv2d(3, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): LeakyReLU(negative_slope=0.2, inplace=True)
(2): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(4): LeakyReLU(negative_slope=0.2, inplace=True)
(5): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(6): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(7): LeakyReLU(negative_slope=0.2, inplace=True)
(8): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(9): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(10): LeakyReLU(negative_slope=0.2, inplace=True)
(11): Conv2d(256, 1, kernel_size=(4, 4), stride=(1, 1), bias=False)
(12): Sigmoid()
)
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
netG = Generator().to(device)
netG.apply(weights_init)
print(netG)
Generator(
(gen): Sequential(
(0): ConvTranspose2d(100, 512, kernel_size=(2, 2), stride=(1, 1), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(2): LeakyReLU(negative_slope=0.2)
(3): ConvTranspose2d(512, 256, kernel_size=(3, 3), stride=(1, 1), bias=False)
(4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(5): LeakyReLU(negative_slope=0.2)
(6): ConvTranspose2d(256, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(7): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(8): LeakyReLU(negative_slope=0.2)
(9): ConvTranspose2d(128, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(10): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(11): LeakyReLU(negative_slope=0.2)
(12): ConvTranspose2d(64, 32, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(13): BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(14): LeakyReLU(negative_slope=0.2)
(15): ConvTranspose2d(32, 3, kernel_size=(1, 1), stride=(1, 1), bias=False)
(16): Tanh()
)
)
netD = Discriminator().to(device)
netD.apply(weights_init)
print(netD)
Discriminator(
(dis): Sequential(
(0): Conv2d(3, 32, kernel_size=(1, 1), stride=(1, 1), bias=False)
(1): LeakyReLU(negative_slope=0.2, inplace=True)
(2): Conv2d(32, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(3): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(4): LeakyReLU(negative_slope=0.2, inplace=True)
(5): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(6): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(7): LeakyReLU(negative_slope=0.2, inplace=True)
(8): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
(9): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(10): LeakyReLU(negative_slope=0.2, inplace=True)
(11): Conv2d(256, 1, kernel_size=(4, 4), stride=(1, 1), bias=False)
(12): Sigmoid()
)
)
# Initialize BCELoss function
criterion = nn.BCELoss()
# Create batch of latent vectors that we will use to visualize
# the progression of the generator
fixed_noise = torch.randn(64, nz, 1, 1, device=device)
# Establish convention for real and fake labels during training
real_label = 1.
fake_label = 0.
import torch.optim as optim
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr, betas=(beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr, betas=(beta1, 0.999))
import torchvision.utils as vutils
# Training Loop
# Lists to keep track of progress
img_list = []
G_losses = []
D_losses = []
iters = 0
print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):
# For each batch in the dataloader
for i, data in enumerate(train_loader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
## Train with all-real batch
netD.zero_grad()
optimizerG.zero_grad()
optimizerD.zero_grad()
# Format batch
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size,), real_label, dtype=torch.float, device=device)
# Forward pass real batch through D
output = netD(real_cpu).view(-1)
# Calculate loss on all-real batch
errD_real = criterion(output, label)
# Calculate gradients for D in backward pass
errD_real.backward()
D_x = output.mean().item()
## Train with all-fake batch
# Generate batch of latent vectors
noise = torch.randn(b_size, nz, 1, 1, device=device)
# Generate fake image batch with G
fake = netG(noise)
label.fill_(fake_label)
# Classify all fake batch with D
output = netD(fake.detach()).view(-1)
# Calculate D's loss on the all-fake batch
errD_fake = criterion(output, label)
# Calculate the gradients for this batch, accumulated (summed) with previous gradients
errD_fake.backward()
D_G_z1 = output.mean().item()
# Compute error of D as sum over the fake and the real batches
errD = errD_real + errD_fake
# Update D
#optimizerD.step() #change
#============change===============
#optimizerD.zero_grad()#change
# optimization step
torch.nn.utils.clip_grad_norm_(netD.parameters(), 3.0)
optimizerD.step()
#=================================
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
#optimizerG.zero_grad() #change
label.fill_(real_label) # fake labels are real for generator cost
# Since we just updated D, perform another forward pass of all-fake batch through D
output = netD(fake).view(-1)
# Calculate G's loss based on this output
errG = criterion(output, label)
# Calculate gradients for G
errG.backward()
D_G_z2 = output.mean().item()
# Update G
optimizerG.step()
#================change=============
writer.add_scalar(f'Loss/Discriminator Loss', errD.item(), global_step=iters)
writer.add_scalar(f'Loss/Generator Loss', errG.item(), global_step=iters)
writer.add_scalars(f'Comb_Loss/Losses', {
'Discriminator': errD.item(),
'Generator': errG.item()
}, iters)
#=============================
# Output training stats
if i % 50 == 0:
print('[%d/%d][%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z)): %.4f / %.4f'
% (epoch, num_epochs, i, len(train_loader),
errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))
# Save Losses for plotting later
G_losses.append(errG.item())
D_losses.append(errD.item())
#==================change==============
if(iters % 500 == 0):
#imgs = self.generate()
with torch.no_grad():
imgs = netG(fixed_noise).detach().cpu()
grid = torchvision.utils.make_grid(imgs, nrow=8)
writer.add_image('images', grid, global_step=iters)
torchvision.utils.save_image(grid, os.path.join(os.getcwd(), "imgs", "training", f"imgs_{iters}.png"))
#=================================
# Check how the generator is doing by saving G's output on fixed_noise
if (iters % 500 == 0) or ((epoch == num_epochs-1) and (i == len(train_loader)-1)):
with torch.no_grad():
fake = netG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
iters += 1
Starting Training Loop... [0/25][0/391] Loss_D: 1.7983 Loss_G: 3.8897 D(x): 0.5513 D(G(z)): 0.6471 / 0.0294 [0/25][50/391] Loss_D: 0.0886 Loss_G: 13.3394 D(x): 0.9980 D(G(z)): 0.0826 / 0.0000 [0/25][100/391] Loss_D: 0.4823 Loss_G: 5.0414 D(x): 0.7793 D(G(z)): 0.0706 / 0.0381 [0/25][150/391] Loss_D: 1.1132 Loss_G: 3.6986 D(x): 0.4785 D(G(z)): 0.0224 / 0.0296 [0/25][200/391] Loss_D: 0.5055 Loss_G: 5.0389 D(x): 0.9709 D(G(z)): 0.3492 / 0.0079 [0/25][250/391] Loss_D: 1.4175 Loss_G: 2.6146 D(x): 0.4922 D(G(z)): 0.0899 / 0.1588 [0/25][300/391] Loss_D: 0.2908 Loss_G: 7.5466 D(x): 0.9538 D(G(z)): 0.1705 / 0.0018 [0/25][350/391] Loss_D: 0.1284 Loss_G: 8.4618 D(x): 0.9771 D(G(z)): 0.0879 / 0.0004 [1/25][0/391] Loss_D: 0.6401 Loss_G: 4.2917 D(x): 0.9807 D(G(z)): 0.4288 / 0.0299 [1/25][50/391] Loss_D: 0.9163 Loss_G: 3.6689 D(x): 0.6259 D(G(z)): 0.1310 / 0.0531 [1/25][100/391] Loss_D: 0.2010 Loss_G: 3.3443 D(x): 0.8777 D(G(z)): 0.0361 / 0.0618 [1/25][150/391] Loss_D: 1.1222 Loss_G: 4.3610 D(x): 0.4841 D(G(z)): 0.0222 / 0.0319 [1/25][200/391] Loss_D: 0.9412 Loss_G: 3.3285 D(x): 0.9051 D(G(z)): 0.4912 / 0.0636 [1/25][250/391] Loss_D: 0.7211 Loss_G: 1.8281 D(x): 0.6227 D(G(z)): 0.0559 / 0.2781 [1/25][300/391] Loss_D: 1.0640 Loss_G: 1.2503 D(x): 0.4849 D(G(z)): 0.1281 / 0.3309 [1/25][350/391] Loss_D: 1.0966 Loss_G: 4.7844 D(x): 0.7356 D(G(z)): 0.4600 / 0.0153 [2/25][0/391] Loss_D: 0.6094 Loss_G: 2.9813 D(x): 0.9788 D(G(z)): 0.4045 / 0.0800 [2/25][50/391] Loss_D: 0.6065 Loss_G: 2.8692 D(x): 0.8054 D(G(z)): 0.2053 / 0.1004 [2/25][100/391] Loss_D: 1.2396 Loss_G: 2.7224 D(x): 0.9520 D(G(z)): 0.5698 / 0.1405 [2/25][150/391] Loss_D: 0.6118 Loss_G: 3.4405 D(x): 0.7852 D(G(z)): 0.2657 / 0.0532 [2/25][200/391] Loss_D: 0.4236 Loss_G: 2.6641 D(x): 0.7400 D(G(z)): 0.0608 / 0.1542 [2/25][250/391] Loss_D: 0.5707 Loss_G: 4.5229 D(x): 0.8384 D(G(z)): 0.2714 / 0.0241 [2/25][300/391] Loss_D: 0.9293 Loss_G: 3.5049 D(x): 0.7130 D(G(z)): 0.3045 / 0.0817 [2/25][350/391] Loss_D: 1.0423 Loss_G: 2.9261 D(x): 0.7245 D(G(z)): 0.4001 / 0.0971 [3/25][0/391] Loss_D: 1.2330 Loss_G: 2.4204 D(x): 0.7140 D(G(z)): 0.5030 / 0.1193 [3/25][50/391] Loss_D: 0.7247 Loss_G: 1.2594 D(x): 0.6664 D(G(z)): 0.1586 / 0.3375 [3/25][100/391] Loss_D: 0.7132 Loss_G: 2.4523 D(x): 0.6876 D(G(z)): 0.2178 / 0.1271 [3/25][150/391] Loss_D: 0.5724 Loss_G: 3.4837 D(x): 0.7960 D(G(z)): 0.2442 / 0.0444 [3/25][200/391] Loss_D: 0.8030 Loss_G: 1.4304 D(x): 0.5740 D(G(z)): 0.1444 / 0.2720 [3/25][250/391] Loss_D: 0.9092 Loss_G: 4.1818 D(x): 0.7414 D(G(z)): 0.3811 / 0.0350 [3/25][300/391] Loss_D: 1.1605 Loss_G: 2.3844 D(x): 0.8000 D(G(z)): 0.5448 / 0.1285 [3/25][350/391] Loss_D: 0.6186 Loss_G: 2.4824 D(x): 0.8427 D(G(z)): 0.3145 / 0.1118 [4/25][0/391] Loss_D: 1.3877 Loss_G: 1.6157 D(x): 0.9334 D(G(z)): 0.6649 / 0.2601 [4/25][50/391] Loss_D: 0.7424 Loss_G: 3.0258 D(x): 0.8152 D(G(z)): 0.3539 / 0.0712 [4/25][100/391] Loss_D: 1.0501 Loss_G: 1.1948 D(x): 0.5558 D(G(z)): 0.2462 / 0.3476 [4/25][150/391] Loss_D: 1.7932 Loss_G: 2.1548 D(x): 0.2511 D(G(z)): 0.0506 / 0.1669 [4/25][200/391] Loss_D: 0.8284 Loss_G: 2.5038 D(x): 0.7700 D(G(z)): 0.3769 / 0.1135 [4/25][250/391] Loss_D: 0.9247 Loss_G: 2.1525 D(x): 0.6977 D(G(z)): 0.3542 / 0.1489 [4/25][300/391] Loss_D: 1.0698 Loss_G: 0.9084 D(x): 0.5457 D(G(z)): 0.2493 / 0.4553 [4/25][350/391] Loss_D: 0.9202 Loss_G: 2.0114 D(x): 0.8186 D(G(z)): 0.4573 / 0.1779 [5/25][0/391] Loss_D: 0.9932 Loss_G: 2.2815 D(x): 0.7634 D(G(z)): 0.4523 / 0.1401 [5/25][50/391] Loss_D: 1.2090 Loss_G: 2.6366 D(x): 0.8596 D(G(z)): 0.5966 / 0.0990 [5/25][100/391] Loss_D: 1.4582 Loss_G: 2.3454 D(x): 0.7482 D(G(z)): 0.6194 / 0.1318 [5/25][150/391] Loss_D: 0.9455 Loss_G: 1.3037 D(x): 0.5608 D(G(z)): 0.2148 / 0.3225 [5/25][200/391] Loss_D: 0.9415 Loss_G: 1.3553 D(x): 0.5519 D(G(z)): 0.1773 / 0.3039 [5/25][250/391] Loss_D: 1.1239 Loss_G: 1.9336 D(x): 0.8879 D(G(z)): 0.5781 / 0.1903 [5/25][300/391] Loss_D: 1.3595 Loss_G: 1.6428 D(x): 0.3869 D(G(z)): 0.1439 / 0.2643 [5/25][350/391] Loss_D: 0.9521 Loss_G: 2.5468 D(x): 0.7139 D(G(z)): 0.3982 / 0.1019 [6/25][0/391] Loss_D: 0.9215 Loss_G: 2.8661 D(x): 0.7729 D(G(z)): 0.4302 / 0.0827 [6/25][50/391] Loss_D: 0.7925 Loss_G: 3.1660 D(x): 0.7384 D(G(z)): 0.3327 / 0.0646 [6/25][100/391] Loss_D: 1.5379 Loss_G: 2.1139 D(x): 0.8133 D(G(z)): 0.6674 / 0.1747 [6/25][150/391] Loss_D: 0.8158 Loss_G: 0.7848 D(x): 0.6175 D(G(z)): 0.2264 / 0.4900 [6/25][200/391] Loss_D: 0.9971 Loss_G: 1.4904 D(x): 0.5485 D(G(z)): 0.2352 / 0.2758 [6/25][250/391] Loss_D: 1.2067 Loss_G: 1.1929 D(x): 0.5278 D(G(z)): 0.3294 / 0.3556 [6/25][300/391] Loss_D: 1.2118 Loss_G: 0.5665 D(x): 0.5060 D(G(z)): 0.3072 / 0.6040 [6/25][350/391] Loss_D: 1.8276 Loss_G: 0.6589 D(x): 0.2840 D(G(z)): 0.2106 / 0.5535 [7/25][0/391] Loss_D: 0.9075 Loss_G: 3.7370 D(x): 0.8077 D(G(z)): 0.4406 / 0.0341 [7/25][50/391] Loss_D: 0.6509 Loss_G: 3.1920 D(x): 0.7791 D(G(z)): 0.2893 / 0.0615 [7/25][100/391] Loss_D: 0.9715 Loss_G: 2.3927 D(x): 0.6439 D(G(z)): 0.3302 / 0.1201 [7/25][150/391] Loss_D: 0.9659 Loss_G: 1.4535 D(x): 0.8127 D(G(z)): 0.4910 / 0.2755 [7/25][200/391] Loss_D: 0.8202 Loss_G: 3.2838 D(x): 0.7798 D(G(z)): 0.3798 / 0.0594 [7/25][250/391] Loss_D: 0.8476 Loss_G: 2.6381 D(x): 0.8214 D(G(z)): 0.4405 / 0.0901 [7/25][300/391] Loss_D: 0.9832 Loss_G: 1.0769 D(x): 0.5372 D(G(z)): 0.1995 / 0.4107 [7/25][350/391] Loss_D: 0.9055 Loss_G: 2.4495 D(x): 0.8392 D(G(z)): 0.4638 / 0.1184 [8/25][0/391] Loss_D: 0.7162 Loss_G: 1.4072 D(x): 0.6698 D(G(z)): 0.2103 / 0.2869 [8/25][50/391] Loss_D: 0.8447 Loss_G: 3.1218 D(x): 0.7905 D(G(z)): 0.4013 / 0.0639 [8/25][100/391] Loss_D: 1.2527 Loss_G: 2.4446 D(x): 0.6383 D(G(z)): 0.4765 / 0.1235 [8/25][150/391] Loss_D: 0.8084 Loss_G: 2.8276 D(x): 0.6960 D(G(z)): 0.2938 / 0.0886 [8/25][200/391] Loss_D: 1.0695 Loss_G: 3.2215 D(x): 0.8396 D(G(z)): 0.5276 / 0.0630 [8/25][250/391] Loss_D: 0.6466 Loss_G: 3.0567 D(x): 0.8174 D(G(z)): 0.3141 / 0.0681 [8/25][300/391] Loss_D: 1.1699 Loss_G: 2.1202 D(x): 0.5446 D(G(z)): 0.3520 / 0.1512 [8/25][350/391] Loss_D: 0.7202 Loss_G: 3.8157 D(x): 0.7890 D(G(z)): 0.3327 / 0.0324 [9/25][0/391] Loss_D: 0.8246 Loss_G: 2.8396 D(x): 0.8148 D(G(z)): 0.4154 / 0.0794 [9/25][50/391] Loss_D: 1.4322 Loss_G: 0.9161 D(x): 0.9389 D(G(z)): 0.7046 / 0.4527 [9/25][100/391] Loss_D: 1.0940 Loss_G: 3.7774 D(x): 0.7260 D(G(z)): 0.4537 / 0.0378 [9/25][150/391] Loss_D: 0.7735 Loss_G: 2.4668 D(x): 0.6324 D(G(z)): 0.1987 / 0.1114 [9/25][200/391] Loss_D: 0.4254 Loss_G: 3.4528 D(x): 0.7089 D(G(z)): 0.0364 / 0.0506 [9/25][250/391] Loss_D: 0.5322 Loss_G: 3.9477 D(x): 0.9021 D(G(z)): 0.2937 / 0.0290 [9/25][300/391] Loss_D: 1.0251 Loss_G: 4.3889 D(x): 0.9307 D(G(z)): 0.5429 / 0.0222 [9/25][350/391] Loss_D: 0.4061 Loss_G: 5.9011 D(x): 0.9050 D(G(z)): 0.2024 / 0.0070 [10/25][0/391] Loss_D: 0.4441 Loss_G: 2.5279 D(x): 0.7693 D(G(z)): 0.1361 / 0.1165 [10/25][50/391] Loss_D: 0.9334 Loss_G: 4.7283 D(x): 0.9363 D(G(z)): 0.5287 / 0.0155 [10/25][100/391] Loss_D: 0.9581 Loss_G: 1.9809 D(x): 0.4961 D(G(z)): 0.1319 / 0.1977 [10/25][150/391] Loss_D: 0.6660 Loss_G: 4.9877 D(x): 0.8577 D(G(z)): 0.3651 / 0.0114 [10/25][200/391] Loss_D: 0.8252 Loss_G: 6.0824 D(x): 0.7890 D(G(z)): 0.3764 / 0.0036 [10/25][250/391] Loss_D: 0.6798 Loss_G: 1.4467 D(x): 0.6205 D(G(z)): 0.0509 / 0.3076 [10/25][300/391] Loss_D: 0.6765 Loss_G: 4.1439 D(x): 0.7701 D(G(z)): 0.2802 / 0.0280 [10/25][350/391] Loss_D: 0.4814 Loss_G: 3.3054 D(x): 0.8703 D(G(z)): 0.2632 / 0.0509 [11/25][0/391] Loss_D: 1.2852 Loss_G: 2.5890 D(x): 0.9778 D(G(z)): 0.6457 / 0.1156 [11/25][50/391] Loss_D: 0.6869 Loss_G: 3.5637 D(x): 0.8851 D(G(z)): 0.3942 / 0.0402 [11/25][100/391] Loss_D: 0.9168 Loss_G: 5.6046 D(x): 0.8650 D(G(z)): 0.4914 / 0.0065 [11/25][150/391] Loss_D: 0.5723 Loss_G: 3.9828 D(x): 0.8913 D(G(z)): 0.3256 / 0.0335 [11/25][200/391] Loss_D: 0.8269 Loss_G: 3.9581 D(x): 0.8369 D(G(z)): 0.4201 / 0.0318 [11/25][250/391] Loss_D: 1.0342 Loss_G: 5.1051 D(x): 0.4431 D(G(z)): 0.0478 / 0.0128 [11/25][300/391] Loss_D: 0.6757 Loss_G: 3.1019 D(x): 0.9836 D(G(z)): 0.4205 / 0.0764 [11/25][350/391] Loss_D: 0.3880 Loss_G: 3.6307 D(x): 0.8219 D(G(z)): 0.1502 / 0.0451 [12/25][0/391] Loss_D: 0.5729 Loss_G: 3.6727 D(x): 0.8226 D(G(z)): 0.2570 / 0.0417 [12/25][50/391] Loss_D: 0.5646 Loss_G: 2.4243 D(x): 0.6678 D(G(z)): 0.0836 / 0.1281 [12/25][100/391] Loss_D: 1.2572 Loss_G: 3.2575 D(x): 0.3789 D(G(z)): 0.0213 / 0.0591 [12/25][150/391] Loss_D: 0.3368 Loss_G: 4.4227 D(x): 0.8767 D(G(z)): 0.1636 / 0.0199 [12/25][200/391] Loss_D: 0.6468 Loss_G: 2.2196 D(x): 0.6249 D(G(z)): 0.0667 / 0.1530 [12/25][250/391] Loss_D: 0.8784 Loss_G: 4.4701 D(x): 0.9581 D(G(z)): 0.5019 / 0.0185 [12/25][300/391] Loss_D: 0.4623 Loss_G: 2.3162 D(x): 0.7541 D(G(z)): 0.1070 / 0.1461 [12/25][350/391] Loss_D: 0.6957 Loss_G: 0.5763 D(x): 0.6132 D(G(z)): 0.0695 / 0.6125 [13/25][0/391] Loss_D: 0.6626 Loss_G: 1.8237 D(x): 0.6792 D(G(z)): 0.1234 / 0.2622 [13/25][50/391] Loss_D: 0.2797 Loss_G: 4.1523 D(x): 0.9043 D(G(z)): 0.1381 / 0.0293 [13/25][100/391] Loss_D: 0.3521 Loss_G: 4.4929 D(x): 0.7411 D(G(z)): 0.0136 / 0.0206 [13/25][150/391] Loss_D: 0.4574 Loss_G: 6.9689 D(x): 0.9538 D(G(z)): 0.2773 / 0.0018 [13/25][200/391] Loss_D: 0.6826 Loss_G: 3.5669 D(x): 0.7504 D(G(z)): 0.2301 / 0.0535 [13/25][250/391] Loss_D: 0.5768 Loss_G: 5.8378 D(x): 0.8813 D(G(z)): 0.3214 / 0.0054 [13/25][300/391] Loss_D: 1.3134 Loss_G: 4.3000 D(x): 0.3648 D(G(z)): 0.0512 / 0.0280 [13/25][350/391] Loss_D: 0.5634 Loss_G: 3.6775 D(x): 0.8748 D(G(z)): 0.3050 / 0.0382 [14/25][0/391] Loss_D: 0.8026 Loss_G: 3.6659 D(x): 0.9420 D(G(z)): 0.4589 / 0.0403 [14/25][50/391] Loss_D: 0.8654 Loss_G: 2.9460 D(x): 0.5867 D(G(z)): 0.2016 / 0.0820 [14/25][100/391] Loss_D: 0.5883 Loss_G: 5.4727 D(x): 0.9123 D(G(z)): 0.3519 / 0.0070 [14/25][150/391] Loss_D: 0.5396 Loss_G: 2.9452 D(x): 0.6305 D(G(z)): 0.0168 / 0.0796 [14/25][200/391] Loss_D: 0.3406 Loss_G: 5.8342 D(x): 0.9562 D(G(z)): 0.2181 / 0.0054 [14/25][250/391] Loss_D: 0.4715 Loss_G: 3.7800 D(x): 0.7414 D(G(z)): 0.1035 / 0.0362 [14/25][300/391] Loss_D: 0.4659 Loss_G: 4.9737 D(x): 0.9630 D(G(z)): 0.3226 / 0.0102 [14/25][350/391] Loss_D: 0.2618 Loss_G: 5.2260 D(x): 0.8954 D(G(z)): 0.1206 / 0.0106 [15/25][0/391] Loss_D: 0.2924 Loss_G: 3.9523 D(x): 0.8447 D(G(z)): 0.0765 / 0.0332 [15/25][50/391] Loss_D: 0.5051 Loss_G: 4.6962 D(x): 0.6867 D(G(z)): 0.0604 / 0.0190 [15/25][100/391] Loss_D: 0.8935 Loss_G: 3.0288 D(x): 0.5650 D(G(z)): 0.1465 / 0.0853 [15/25][150/391] Loss_D: 1.1511 Loss_G: 4.3299 D(x): 0.4168 D(G(z)): 0.0308 / 0.0229 [15/25][200/391] Loss_D: 0.4733 Loss_G: 4.0985 D(x): 0.7037 D(G(z)): 0.0220 / 0.0335 [15/25][250/391] Loss_D: 0.7384 Loss_G: 6.2906 D(x): 0.9348 D(G(z)): 0.4127 / 0.0040 [15/25][300/391] Loss_D: 0.6038 Loss_G: 5.9640 D(x): 0.6256 D(G(z)): 0.0049 / 0.0077 [15/25][350/391] Loss_D: 1.2505 Loss_G: 2.9977 D(x): 0.9399 D(G(z)): 0.6092 / 0.0886 [16/25][0/391] Loss_D: 0.7926 Loss_G: 4.0665 D(x): 0.5237 D(G(z)): 0.0313 / 0.0336 [16/25][50/391] Loss_D: 1.8893 Loss_G: 3.8301 D(x): 0.9569 D(G(z)): 0.7032 / 0.0654 [16/25][100/391] Loss_D: 2.0838 Loss_G: 1.5478 D(x): 0.9866 D(G(z)): 0.8120 / 0.2907 [16/25][150/391] Loss_D: 0.2438 Loss_G: 6.9115 D(x): 0.9549 D(G(z)): 0.1664 / 0.0020 [16/25][200/391] Loss_D: 0.7932 Loss_G: 5.3398 D(x): 0.5500 D(G(z)): 0.0117 / 0.0165 [16/25][250/391] Loss_D: 0.2593 Loss_G: 5.8294 D(x): 0.9189 D(G(z)): 0.1469 / 0.0051 [16/25][300/391] Loss_D: 0.4408 Loss_G: 3.8481 D(x): 0.6924 D(G(z)): 0.0041 / 0.0391 [16/25][350/391] Loss_D: 1.4053 Loss_G: 6.8724 D(x): 0.9881 D(G(z)): 0.6566 / 0.0023 [17/25][0/391] Loss_D: 0.5956 Loss_G: 5.2931 D(x): 0.9743 D(G(z)): 0.3880 / 0.0083 [17/25][50/391] Loss_D: 1.4714 Loss_G: 2.0536 D(x): 0.9919 D(G(z)): 0.6771 / 0.2229 [17/25][100/391] Loss_D: 0.1905 Loss_G: 2.0700 D(x): 0.8852 D(G(z)): 0.0541 / 0.1871 [17/25][150/391] Loss_D: 0.0803 Loss_G: 8.2152 D(x): 0.9669 D(G(z)): 0.0436 / 0.0006 [17/25][200/391] Loss_D: 0.5106 Loss_G: 4.6577 D(x): 0.6775 D(G(z)): 0.0363 / 0.0255 [17/25][250/391] Loss_D: 0.5803 Loss_G: 7.1843 D(x): 0.9714 D(G(z)): 0.3691 / 0.0015 [17/25][300/391] Loss_D: 1.6854 Loss_G: 1.6616 D(x): 0.9866 D(G(z)): 0.7221 / 0.3268 [17/25][350/391] Loss_D: 0.3846 Loss_G: 4.4016 D(x): 0.9381 D(G(z)): 0.2240 / 0.0219 [18/25][0/391] Loss_D: 1.3756 Loss_G: 5.1594 D(x): 0.3709 D(G(z)): 0.0392 / 0.0118 [18/25][50/391] Loss_D: 0.9198 Loss_G: 3.0739 D(x): 0.5385 D(G(z)): 0.1261 / 0.0805 [18/25][100/391] Loss_D: 0.2677 Loss_G: 4.5222 D(x): 0.8235 D(G(z)): 0.0430 / 0.0211 [18/25][150/391] Loss_D: 0.6139 Loss_G: 2.6083 D(x): 0.6515 D(G(z)): 0.0629 / 0.1330 [18/25][200/391] Loss_D: 1.2568 Loss_G: 4.6723 D(x): 0.9849 D(G(z)): 0.6286 / 0.0184 [18/25][250/391] Loss_D: 0.6594 Loss_G: 4.3791 D(x): 0.6099 D(G(z)): 0.0139 / 0.0246 [18/25][300/391] Loss_D: 0.4874 Loss_G: 6.3703 D(x): 0.6786 D(G(z)): 0.0230 / 0.0031 [18/25][350/391] Loss_D: 0.4072 Loss_G: 3.7537 D(x): 0.8381 D(G(z)): 0.1662 / 0.0398 [19/25][0/391] Loss_D: 0.7184 Loss_G: 3.2488 D(x): 0.9490 D(G(z)): 0.4134 / 0.0651 [19/25][50/391] Loss_D: 0.3999 Loss_G: 1.8035 D(x): 0.7737 D(G(z)): 0.0826 / 0.2336 [19/25][100/391] Loss_D: 0.2394 Loss_G: 7.3178 D(x): 0.9530 D(G(z)): 0.1470 / 0.0012 [19/25][150/391] Loss_D: 2.4469 Loss_G: 1.3046 D(x): 0.9988 D(G(z)): 0.8511 / 0.3844 [19/25][200/391] Loss_D: 0.8553 Loss_G: 1.9865 D(x): 0.5518 D(G(z)): 0.0608 / 0.2181 [19/25][250/391] Loss_D: 0.6311 Loss_G: 5.3039 D(x): 0.6093 D(G(z)): 0.0047 / 0.0105 [19/25][300/391] Loss_D: 0.4192 Loss_G: 3.6285 D(x): 0.7643 D(G(z)): 0.0845 / 0.0544 [19/25][350/391] Loss_D: 1.1241 Loss_G: 2.9748 D(x): 0.9944 D(G(z)): 0.5719 / 0.1126 [20/25][0/391] Loss_D: 0.4667 Loss_G: 4.5307 D(x): 0.9087 D(G(z)): 0.2596 / 0.0206 [20/25][50/391] Loss_D: 1.1553 Loss_G: 2.1810 D(x): 0.9875 D(G(z)): 0.5999 / 0.1796 [20/25][100/391] Loss_D: 0.5886 Loss_G: 5.3859 D(x): 0.9614 D(G(z)): 0.3760 / 0.0096 [20/25][150/391] Loss_D: 0.5272 Loss_G: 3.0260 D(x): 0.6884 D(G(z)): 0.0790 / 0.0811 [20/25][200/391] Loss_D: 0.2662 Loss_G: 6.1163 D(x): 0.9716 D(G(z)): 0.1886 / 0.0037 [20/25][250/391] Loss_D: 0.4485 Loss_G: 6.1866 D(x): 0.9942 D(G(z)): 0.3093 / 0.0037 [20/25][300/391] Loss_D: 0.6227 Loss_G: 6.5102 D(x): 0.8946 D(G(z)): 0.3488 / 0.0031 [20/25][350/391] Loss_D: 0.3246 Loss_G: 3.3423 D(x): 0.8437 D(G(z)): 0.1161 / 0.0540 [21/25][0/391] Loss_D: 0.2576 Loss_G: 5.3782 D(x): 0.8897 D(G(z)): 0.1081 / 0.0118 [21/25][50/391] Loss_D: 0.3600 Loss_G: 4.7319 D(x): 0.7508 D(G(z)): 0.0290 / 0.0198 [21/25][100/391] Loss_D: 0.1538 Loss_G: 2.0470 D(x): 0.9020 D(G(z)): 0.0398 / 0.1993 [21/25][150/391] Loss_D: 0.1915 Loss_G: 2.0743 D(x): 0.8628 D(G(z)): 0.0287 / 0.1758 [21/25][200/391] Loss_D: 0.6488 Loss_G: 3.0416 D(x): 0.6207 D(G(z)): 0.0378 / 0.0958 [21/25][250/391] Loss_D: 0.6557 Loss_G: 3.2355 D(x): 0.5774 D(G(z)): 0.0216 / 0.0734 [21/25][300/391] Loss_D: 0.4989 Loss_G: 3.6285 D(x): 0.6722 D(G(z)): 0.0226 / 0.0481 [21/25][350/391] Loss_D: 0.3295 Loss_G: 6.5275 D(x): 0.9698 D(G(z)): 0.2248 / 0.0030 [22/25][0/391] Loss_D: 0.2492 Loss_G: 7.2003 D(x): 0.9415 D(G(z)): 0.1523 / 0.0015 [22/25][50/391] Loss_D: 0.3293 Loss_G: 3.7548 D(x): 0.8281 D(G(z)): 0.0971 / 0.0407 [22/25][100/391] Loss_D: 0.3660 Loss_G: 3.4978 D(x): 0.7546 D(G(z)): 0.0279 / 0.0561 [22/25][150/391] Loss_D: 0.6121 Loss_G: 3.4748 D(x): 0.6660 D(G(z)): 0.0829 / 0.0570 [22/25][200/391] Loss_D: 1.0981 Loss_G: 3.0244 D(x): 0.4437 D(G(z)): 0.0537 / 0.0987 [22/25][250/391] Loss_D: 0.2267 Loss_G: 4.2594 D(x): 0.9274 D(G(z)): 0.1211 / 0.0248 [22/25][300/391] Loss_D: 0.5179 Loss_G: 1.9090 D(x): 0.6822 D(G(z)): 0.0623 / 0.2237 [22/25][350/391] Loss_D: 0.1739 Loss_G: 8.8477 D(x): 0.9468 D(G(z)): 0.0997 / 0.0006 [23/25][0/391] Loss_D: 0.2159 Loss_G: 4.5522 D(x): 0.8660 D(G(z)): 0.0485 / 0.0232 [23/25][50/391] Loss_D: 0.3977 Loss_G: 5.2742 D(x): 0.8694 D(G(z)): 0.1707 / 0.0222 [23/25][100/391] Loss_D: 1.6703 Loss_G: 2.6393 D(x): 0.9988 D(G(z)): 0.7380 / 0.1169 [23/25][150/391] Loss_D: 0.1252 Loss_G: 4.4584 D(x): 0.9371 D(G(z)): 0.0527 / 0.0188 [23/25][200/391] Loss_D: 1.2977 Loss_G: 5.2901 D(x): 0.3717 D(G(z)): 0.0069 / 0.0116 [23/25][250/391] Loss_D: 0.2397 Loss_G: 4.6750 D(x): 0.8230 D(G(z)): 0.0204 / 0.0232 [23/25][300/391] Loss_D: 0.6405 Loss_G: 2.6863 D(x): 0.6085 D(G(z)): 0.0197 / 0.1433 [23/25][350/391] Loss_D: 0.1148 Loss_G: 5.4325 D(x): 0.9669 D(G(z)): 0.0700 / 0.0095 [24/25][0/391] Loss_D: 0.4317 Loss_G: 3.8536 D(x): 0.7079 D(G(z)): 0.0157 / 0.0548 [24/25][50/391] Loss_D: 0.2910 Loss_G: 3.1418 D(x): 0.7984 D(G(z)): 0.0356 / 0.0801 [24/25][100/391] Loss_D: 1.4918 Loss_G: 7.0066 D(x): 0.3204 D(G(z)): 0.0021 / 0.0020 [24/25][150/391] Loss_D: 0.4612 Loss_G: 4.2994 D(x): 0.8778 D(G(z)): 0.2305 / 0.0260 [24/25][200/391] Loss_D: 0.2545 Loss_G: 8.3412 D(x): 0.9609 D(G(z)): 0.1489 / 0.0006 [24/25][250/391] Loss_D: 0.2800 Loss_G: 3.2279 D(x): 0.8750 D(G(z)): 0.0689 / 0.0649 [24/25][300/391] Loss_D: 2.0958 Loss_G: 5.4194 D(x): 0.2136 D(G(z)): 0.0153 / 0.0093 [24/25][350/391] Loss_D: 0.1916 Loss_G: 7.8124 D(x): 0.9433 D(G(z)): 0.1104 / 0.0009
import matplotlib.animation as animation
import matplotlib.animation as animation
from IPython.display import HTML
fig = plt.figure(figsize=(8,8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i,(1,2,0)), animated=True)] for i in img_list]
ani = animation.ArtistAnimation(fig, ims, interval=1000, repeat_delay=1000, blit=True)
HTML(ani.to_jshtml())